Generating Wikipedia articles using LSTMs & Tensorflow


In [1]:
# -*- coding: utf-8 -*-

# 1. Import the dependencies
import numpy as np
import tensorflow as tf
import random,datetime

In [2]:
# 2. Import the dataset
text = open('wiki.test.raw').read()
print 'Length of the text is {} '.format(len(text))


Length of the text is 1290590 

In [3]:
# 3. View the contents of the file
print 'Header : '
print text[:100]


Header : 
 
 = Robert Boulter = 
 
 Robert Boulter is an English film , television and theatre actor . He had 

In [4]:
# 4. Prepare the list of charecters used in the dataset.
chars = sorted(list(set(text)))
char_size = len(chars)
print 'Number of charecters : {}'.format(char_size)
print chars


Number of charecters : 178
['\n', ' ', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', ']', '^', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '\x80', '\x81', '\x82', '\x83', '\x84', '\x85', '\x86', '\x87', '\x88', '\x89', '\x8a', '\x8b', '\x8c', '\x8d', '\x8e', '\x8f', '\x90', '\x91', '\x92', '\x93', '\x94', '\x95', '\x96', '\x97', '\x98', '\x99', '\x9b', '\x9c', '\x9d', '\x9e', '\x9f', '\xa0', '\xa1', '\xa2', '\xa3', '\xa4', '\xa5', '\xa6', '\xa7', '\xa8', '\xa9', '\xaa', '\xab', '\xad', '\xae', '\xaf', '\xb0', '\xb1', '\xb2', '\xb3', '\xb4', '\xb5', '\xb6', '\xb7', '\xb8', '\xb9', '\xba', '\xbb', '\xbc', '\xbd', '\xbe', '\xbf', '\xc2', '\xc3', '\xc4', '\xc5', '\xc7', '\xc9', '\xca', '\xcb', '\xcc', '\xcd', '\xce', '\xcf', '\xd0', '\xd7', '\xd8', '\xd9', '\xda', '\xe0', '\xe1', '\xe2', '\xe3', '\xe4', '\xe5', '\xe6', '\xe7', '\xe8', '\xe9']

In [5]:
#5. Generate mappings so that we can quantify the data.
char2id=dict((c,i) for i,c in enumerate(chars))
id2char=dict((i,c) for i,c in enumerate(chars))

In [6]:
#6. Sample one charecter from the prediction probablity distribution. 
def sample(prediction):
    r = random.uniform(0,1)
    s =0
    char_id = len(prediction) - 1
    for i in range(len(prediction)):
        s  = s + prediction[i]
        if s >= r:
            char_id = i
            break
    char_one_hot = np.zeros(shape=[char_size])
    char_one_hot[char_id]=1.0
    return char_one_hot

In [7]:
#7. vectorize our data and feed it into model

len_per_section = 50
skip = 2
sections = []
next_chars = []

for i in range(0,len(text)-len_per_section,skip):
    sections.append(text[i:i+len_per_section])
    next_chars.append(text[i+len_per_section])

#Vectorize using numpy

X = np.zeros((len(sections), len_per_section), dtype=np.int32) #will be converted to one hot later

y = np.zeros((len(sections),char_size))
for i,section in enumerate(sections):
    X[i] = [char2id[x] for x in section]  #will be converted to one hot later
    y[i,char2id[next_chars[i]]] = 1

In [8]:
print X[0]


[ 1  0  1 30  1 51 77 64 67 80 82  1 35 77 83 74 82 67 80  1 30  1  0  1  0
  1 51 77 64 67 80 82  1 35 77 83 74 82 67 80  1 71 81  1 63 76  1 38 76 69]

In [9]:
#8. Initialize hyper parameters and checkpoint directory
batch_size = 32
max_steps=7000
log_every=100
save_every=400
hidden_nodes=1024
starting_text = 'i am thinking that'
checkpoint_directory='ckpt'

if tf.gfile.Exists(checkpoint_directory):
    tf.gfile.DeleteRecursively(checkpoint_directory)
tf.gfile.MakeDirs(checkpoint_directory)

In [10]:
#9. Segment which makes LSTMs from scratch.
graph=tf.Graph()
with graph.as_default():
    global_step=tf.Variable(0) #Number of batches seen so far
    batch_data_tensor = tf.placeholder(tf.int32,[batch_size,len_per_section]) # We will accept indices, convert to one hot later
    labels = tf.placeholder(tf.float32,[batch_size,char_size])
    data = tf.one_hot(batch_data_tensor, depth=char_size, dtype=tf.float32, axis=-1)

    #input gate, output gate, forget gate and internal state
    #they will be calculated
    
    #input gate
    w_ii = tf.Variable(tf.truncated_normal([char_size,hidden_nodes],-0.1,0.1))
    w_io = tf.Variable(tf.truncated_normal([hidden_nodes,hidden_nodes],-0.1,0.1))
    b_i = tf.Variable(tf.zeros([1,hidden_nodes]))
    
    #forget gate
    w_fi = tf.Variable(tf.truncated_normal([char_size,hidden_nodes],-0.1,0.1))
    w_fo = tf.Variable(tf.truncated_normal([hidden_nodes,hidden_nodes],-0.1,0.1))
    b_f = tf.Variable(tf.zeros([1,hidden_nodes]))
    
    #output gate
    w_oi = tf.Variable(tf.truncated_normal([char_size,hidden_nodes],-0.1,0.1))
    w_oo = tf.Variable(tf.truncated_normal([hidden_nodes,hidden_nodes],-0.1,0.1))
    b_o = tf.Variable(tf.zeros([1,hidden_nodes]))
    
    #memory cell
    w_ci = tf.Variable(tf.truncated_normal([char_size,hidden_nodes],-0.1,0.1))
    w_co = tf.Variable(tf.truncated_normal([hidden_nodes,hidden_nodes],-0.1,0.1))
    b_c = tf.Variable(tf.zeros([1,hidden_nodes]))
    
    def lstm(i,o,state):
        input_gate = tf.sigmoid(tf.matmul(i,w_ii)+tf.matmul(o,w_io)+b_i)
        forget_gate = tf.sigmoid(tf.matmul(i,w_fi) + tf.matmul(o,w_fo) + b_f)
        output_gate = tf.sigmoid(tf.matmul(i,w_oi) + tf.matmul(o,w_oo) + b_o)
        memory_cell = tf.sigmoid(tf.matmul(i,w_ci) + tf.matmul(o,w_co) + b_c)
        
        state = forget_gate * state + input_gate * memory_cell #What we want to forget from given, and what we want to remember from what we know
        output = output_gate*tf.tanh(state)
        return output,state
    
    output=tf.zeros([batch_size,hidden_nodes])
    state=tf.zeros([batch_size,hidden_nodes])

    for i in range(len_per_section):
        output,state = lstm(data[:,i,:],output,state)
        if i == 0:
            outputs_all_i = output
            labels_all_i=data[:,i+1,:]
        elif i != len_per_section -1:
            outputs_all_i=tf.concat(0,[outputs_all_i,output])
            labels_all_i=tf.concat(0,[labels_all_i,data[:,i+1,:]])
        else:
            outputs_all_i=tf.concat(0,[outputs_all_i,output])
            labels_all_i=tf.concat(0,[labels_all_i,labels])
    w=tf.Variable(tf.truncated_normal([hidden_nodes,char_size],-0.1,0.1))
    b=tf.Variable(tf.zeros([char_size]))
    
    logits = tf.matmul(outputs_all_i,w) + b
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits,labels_all_i))
    
    optimizer = tf.train.GradientDescentOptimizer(10.).minimize(loss, global_step=global_step)

In [11]:
with tf.Session(graph=graph) as sess:
    tf.initialize_all_variables().run()
    offset = 0
    saver = tf.train.Saver()
    for step in range(max_steps):
        offset = offset % len(X)
        if offset <=(len(X) - batch_size):
            batch_data=X[offset:offset+batch_size]
            batch_labels=y[offset:offset+batch_size]
            offset += batch_size
        else:
            to_add = batch_size - (len(X) - offset)
            batch_data = np.concatenate((X[offset:len(X)],X[0:to_add]))
            batch_labels = np.concatenate((y[offset:len(X)],y[0:to_add]))
            offset = to_add
        #print batch_labels.shape
        _,training_loss = sess.run([optimizer,loss],feed_dict={batch_data_tensor:batch_data,labels:batch_labels})
        
        if step % 10 == 0:
            print 'Training loss at step %d: %.2f (%s)' % (step, training_loss, datetime.datetime.now())
            if step % save_every == 0:
                saver.save(sess, checkpoint_directory + '/model', global_step=step)
                print 'Model Saved!'


Training loss at step 0: 5.06 (2017-06-06 16:29:16.747130)
Model Saved!
Training loss at step 10: 6.17 (2017-06-06 16:29:19.713666)
Training loss at step 20: 4.04 (2017-06-06 16:29:21.672587)
Training loss at step 30: 3.41 (2017-06-06 16:29:23.540309)
Training loss at step 40: 3.41 (2017-06-06 16:29:25.544886)
Training loss at step 50: 3.32 (2017-06-06 16:29:27.556183)
Training loss at step 60: 3.41 (2017-06-06 16:29:29.518625)
Training loss at step 70: 3.28 (2017-06-06 16:29:31.524406)
Training loss at step 80: 2.98 (2017-06-06 16:29:33.503908)
Training loss at step 90: 4.74 (2017-06-06 16:29:35.474369)
Training loss at step 100: 3.46 (2017-06-06 16:29:37.432457)
Training loss at step 110: 3.15 (2017-06-06 16:29:39.303156)
Training loss at step 120: 2.96 (2017-06-06 16:29:41.243615)
Training loss at step 130: 2.90 (2017-06-06 16:29:43.134557)
Training loss at step 140: 3.06 (2017-06-06 16:29:44.998523)
Training loss at step 150: 2.87 (2017-06-06 16:29:46.862905)
Training loss at step 160: 2.94 (2017-06-06 16:29:48.727404)
Training loss at step 170: 3.77 (2017-06-06 16:29:50.592184)
Training loss at step 180: 3.52 (2017-06-06 16:29:52.475884)
Training loss at step 190: 2.82 (2017-06-06 16:29:54.365618)
Training loss at step 200: 3.06 (2017-06-06 16:29:56.255361)
Training loss at step 210: 2.96 (2017-06-06 16:29:58.137763)
Training loss at step 220: 3.16 (2017-06-06 16:30:00.024638)
Training loss at step 230: 2.92 (2017-06-06 16:30:01.919526)
Training loss at step 240: 3.28 (2017-06-06 16:30:03.812139)
Training loss at step 250: 3.24 (2017-06-06 16:30:05.707581)
Training loss at step 260: 2.90 (2017-06-06 16:30:07.586323)
Training loss at step 270: 2.66 (2017-06-06 16:30:09.449446)
Training loss at step 280: 3.45 (2017-06-06 16:30:11.305527)
Training loss at step 290: 3.06 (2017-06-06 16:30:13.160127)
Training loss at step 300: 2.99 (2017-06-06 16:30:15.018618)
Training loss at step 310: 3.08 (2017-06-06 16:30:16.889234)
Training loss at step 320: 2.80 (2017-06-06 16:30:18.856633)
Training loss at step 330: 3.20 (2017-06-06 16:30:20.741575)
Training loss at step 340: 2.84 (2017-06-06 16:30:22.612048)
Training loss at step 350: 2.96 (2017-06-06 16:30:24.469726)
Training loss at step 360: 3.14 (2017-06-06 16:30:26.330089)
Training loss at step 370: 2.80 (2017-06-06 16:30:28.185207)
Training loss at step 380: 3.03 (2017-06-06 16:30:30.040433)
Training loss at step 390: 2.89 (2017-06-06 16:30:31.896552)
Training loss at step 400: 3.16 (2017-06-06 16:30:33.764444)
Model Saved!
Training loss at step 410: 3.11 (2017-06-06 16:30:36.922015)
Training loss at step 420: 2.93 (2017-06-06 16:30:38.782579)
Training loss at step 430: 3.74 (2017-06-06 16:30:40.668490)
Training loss at step 440: 2.96 (2017-06-06 16:30:42.660330)
Training loss at step 450: 2.78 (2017-06-06 16:30:44.730486)
Training loss at step 460: 3.13 (2017-06-06 16:30:46.639868)
Training loss at step 470: 2.93 (2017-06-06 16:30:48.496764)
Training loss at step 480: 2.84 (2017-06-06 16:30:50.353108)
Training loss at step 490: 3.20 (2017-06-06 16:30:52.210426)
Training loss at step 500: 3.10 (2017-06-06 16:30:54.070775)
Training loss at step 510: 3.07 (2017-06-06 16:30:56.006782)
Training loss at step 520: 3.12 (2017-06-06 16:30:57.861780)
Training loss at step 530: 3.05 (2017-06-06 16:30:59.773949)
Training loss at step 540: 2.86 (2017-06-06 16:31:01.705938)
Training loss at step 550: 3.04 (2017-06-06 16:31:03.713433)
Training loss at step 560: 3.07 (2017-06-06 16:31:05.620660)
Training loss at step 570: 3.23 (2017-06-06 16:31:07.582486)
Training loss at step 580: 3.04 (2017-06-06 16:31:09.446795)
Training loss at step 590: 3.06 (2017-06-06 16:31:11.390659)
Training loss at step 600: 2.86 (2017-06-06 16:31:13.332211)
Training loss at step 610: 2.90 (2017-06-06 16:31:15.336801)
Training loss at step 620: 2.94 (2017-06-06 16:31:17.290535)
Training loss at step 630: 2.90 (2017-06-06 16:31:19.259230)
Training loss at step 640: 3.15 (2017-06-06 16:31:21.204201)
Training loss at step 650: 2.79 (2017-06-06 16:31:23.108477)
Training loss at step 660: 3.14 (2017-06-06 16:31:25.008943)
Training loss at step 670: 3.59 (2017-06-06 16:31:27.018039)
Training loss at step 680: 2.92 (2017-06-06 16:31:29.007560)
Training loss at step 690: 2.72 (2017-06-06 16:31:30.973245)
Training loss at step 700: 2.76 (2017-06-06 16:31:32.943634)
Training loss at step 710: 3.25 (2017-06-06 16:31:34.916868)
Training loss at step 720: 3.02 (2017-06-06 16:31:36.882005)
Training loss at step 730: 2.85 (2017-06-06 16:31:38.846597)
Training loss at step 740: 2.98 (2017-06-06 16:31:40.883347)
Training loss at step 750: 2.75 (2017-06-06 16:31:42.798786)
Training loss at step 760: 3.53 (2017-06-06 16:31:44.671278)
Training loss at step 770: 3.01 (2017-06-06 16:31:46.539644)
Training loss at step 780: 2.85 (2017-06-06 16:31:48.433754)
Training loss at step 790: 2.80 (2017-06-06 16:31:50.329373)
Training loss at step 800: 3.21 (2017-06-06 16:31:52.243633)
Model Saved!
Training loss at step 810: 2.78 (2017-06-06 16:31:54.946182)
Training loss at step 820: 3.30 (2017-06-06 16:31:56.933745)
Training loss at step 830: 2.83 (2017-06-06 16:31:58.899419)
Training loss at step 840: 3.18 (2017-06-06 16:32:00.902400)
Training loss at step 850: 3.09 (2017-06-06 16:32:02.910357)
Training loss at step 860: 2.70 (2017-06-06 16:32:04.944052)
Training loss at step 870: 2.86 (2017-06-06 16:32:06.798561)
Training loss at step 880: 2.71 (2017-06-06 16:32:08.650937)
Training loss at step 890: 2.67 (2017-06-06 16:32:10.504161)
Training loss at step 900: 2.92 (2017-06-06 16:32:12.354173)
Training loss at step 910: 2.68 (2017-06-06 16:32:14.300669)
Training loss at step 920: 2.78 (2017-06-06 16:32:16.286812)
Training loss at step 930: 2.61 (2017-06-06 16:32:18.138091)
Training loss at step 940: 2.85 (2017-06-06 16:32:20.074307)
Training loss at step 950: 3.10 (2017-06-06 16:32:21.938898)
Training loss at step 960: 2.84 (2017-06-06 16:32:23.802572)
Training loss at step 970: 2.90 (2017-06-06 16:32:25.692140)
Training loss at step 980: 2.54 (2017-06-06 16:32:27.584120)
Training loss at step 990: 2.88 (2017-06-06 16:32:29.449149)
Training loss at step 1000: 2.70 (2017-06-06 16:32:31.317384)
Training loss at step 1010: 2.41 (2017-06-06 16:32:33.184736)
Training loss at step 1020: 2.45 (2017-06-06 16:32:35.048633)
Training loss at step 1030: 2.69 (2017-06-06 16:32:36.913418)
Training loss at step 1040: 2.94 (2017-06-06 16:32:38.771051)
Training loss at step 1050: 2.65 (2017-06-06 16:32:40.641006)
Training loss at step 1060: 2.68 (2017-06-06 16:32:42.503604)
Training loss at step 1070: 3.19 (2017-06-06 16:32:44.438586)
Training loss at step 1080: 2.75 (2017-06-06 16:32:46.373955)
Training loss at step 1090: 2.62 (2017-06-06 16:32:48.346692)
Training loss at step 1100: 2.77 (2017-06-06 16:32:50.335690)
Training loss at step 1110: 2.54 (2017-06-06 16:32:52.301265)
Training loss at step 1120: 2.69 (2017-06-06 16:32:54.267527)
Training loss at step 1130: 2.40 (2017-06-06 16:32:56.239703)
Training loss at step 1140: 2.61 (2017-06-06 16:32:58.209973)
Training loss at step 1150: 2.46 (2017-06-06 16:33:00.200945)
Training loss at step 1160: 2.57 (2017-06-06 16:33:02.166581)
Training loss at step 1170: 2.48 (2017-06-06 16:33:04.109771)
Training loss at step 1180: 2.51 (2017-06-06 16:33:06.082931)
Training loss at step 1190: 2.37 (2017-06-06 16:33:08.112502)
Training loss at step 1200: 2.33 (2017-06-06 16:33:10.152233)
Model Saved!
Training loss at step 1210: 2.54 (2017-06-06 16:33:13.729541)
Training loss at step 1220: 2.37 (2017-06-06 16:33:15.699646)
Training loss at step 1230: 2.83 (2017-06-06 16:33:17.678172)
Training loss at step 1240: 2.58 (2017-06-06 16:33:19.673766)
Training loss at step 1250: 2.81 (2017-06-06 16:33:21.651983)
Training loss at step 1260: 2.57 (2017-06-06 16:33:23.587435)
Training loss at step 1270: 2.57 (2017-06-06 16:33:25.446994)
Training loss at step 1280: 2.28 (2017-06-06 16:33:27.398999)
Training loss at step 1290: 2.81 (2017-06-06 16:33:29.366586)
Training loss at step 1300: 2.39 (2017-06-06 16:33:31.227420)
Training loss at step 1310: 2.95 (2017-06-06 16:33:33.095758)
Training loss at step 1320: 2.50 (2017-06-06 16:33:34.980187)
Training loss at step 1330: 2.30 (2017-06-06 16:33:36.865104)
Training loss at step 1340: 2.62 (2017-06-06 16:33:38.756364)
Training loss at step 1350: 2.24 (2017-06-06 16:33:40.634808)
Training loss at step 1360: 2.89 (2017-06-06 16:33:42.519012)
Training loss at step 1370: 2.63 (2017-06-06 16:33:44.408126)
Training loss at step 1380: 2.37 (2017-06-06 16:33:46.268771)
Training loss at step 1390: 2.82 (2017-06-06 16:33:48.125773)
Training loss at step 1400: 2.58 (2017-06-06 16:33:49.981020)
Training loss at step 1410: 2.41 (2017-06-06 16:33:51.833993)
Training loss at step 1420: 2.54 (2017-06-06 16:33:53.725602)
Training loss at step 1430: 2.59 (2017-06-06 16:33:55.588265)
Training loss at step 1440: 2.52 (2017-06-06 16:33:57.460213)
Training loss at step 1450: 2.64 (2017-06-06 16:33:59.325653)
Training loss at step 1460: 2.60 (2017-06-06 16:34:01.201616)
Training loss at step 1470: 2.72 (2017-06-06 16:34:03.070406)
Training loss at step 1480: 2.61 (2017-06-06 16:34:04.944023)
Training loss at step 1490: 2.40 (2017-06-06 16:34:06.829349)
Training loss at step 1500: 2.60 (2017-06-06 16:34:08.705646)
Training loss at step 1510: 2.63 (2017-06-06 16:34:10.571300)
Training loss at step 1520: 2.31 (2017-06-06 16:34:12.453190)
Training loss at step 1530: 2.11 (2017-06-06 16:34:14.420996)
Training loss at step 1540: 2.12 (2017-06-06 16:34:16.359610)
Training loss at step 1550: 2.24 (2017-06-06 16:34:18.352802)
Training loss at step 1560: 2.32 (2017-06-06 16:34:20.293837)
Training loss at step 1570: 2.22 (2017-06-06 16:34:22.270073)
Training loss at step 1580: 2.15 (2017-06-06 16:34:24.250502)
Training loss at step 1590: 2.39 (2017-06-06 16:34:26.115694)
Training loss at step 1600: 2.34 (2017-06-06 16:34:27.970254)
Model Saved!
Training loss at step 1610: 2.30 (2017-06-06 16:34:30.629766)
Training loss at step 1620: 2.31 (2017-06-06 16:34:32.507144)
Training loss at step 1630: 2.47 (2017-06-06 16:34:34.365616)
Training loss at step 1640: 2.26 (2017-06-06 16:34:36.224851)
Training loss at step 1650: 2.21 (2017-06-06 16:34:38.094278)
Training loss at step 1660: 2.23 (2017-06-06 16:34:39.970645)
Training loss at step 1670: 2.17 (2017-06-06 16:34:41.858944)
Training loss at step 1680: 2.00 (2017-06-06 16:34:43.727519)
Training loss at step 1690: 2.17 (2017-06-06 16:34:45.647192)
Training loss at step 1700: 2.47 (2017-06-06 16:34:47.503538)
Training loss at step 1710: 2.21 (2017-06-06 16:34:49.364962)
Training loss at step 1720: 2.44 (2017-06-06 16:34:51.329714)
Training loss at step 1730: 2.15 (2017-06-06 16:34:53.264306)
Training loss at step 1740: 2.20 (2017-06-06 16:34:55.193238)
Training loss at step 1750: 2.05 (2017-06-06 16:34:57.141791)
Training loss at step 1760: 1.95 (2017-06-06 16:34:59.076330)
Training loss at step 1770: 2.04 (2017-06-06 16:35:01.047801)
Training loss at step 1780: 2.27 (2017-06-06 16:35:02.955999)
Training loss at step 1790: 2.33 (2017-06-06 16:35:04.833069)
Training loss at step 1800: 2.31 (2017-06-06 16:35:06.706194)
Training loss at step 1810: 2.11 (2017-06-06 16:35:08.585788)
Training loss at step 1820: 2.08 (2017-06-06 16:35:10.521625)
Training loss at step 1830: 2.20 (2017-06-06 16:35:12.374926)
Training loss at step 1840: 2.11 (2017-06-06 16:35:14.228019)
Training loss at step 1850: 2.09 (2017-06-06 16:35:16.082101)
Training loss at step 1860: 1.89 (2017-06-06 16:35:17.959402)
Training loss at step 1870: 2.98 (2017-06-06 16:35:19.843737)
Training loss at step 1880: 1.99 (2017-06-06 16:35:21.728735)
Training loss at step 1890: 2.04 (2017-06-06 16:35:23.611040)
Training loss at step 1900: 2.42 (2017-06-06 16:35:25.498018)
Training loss at step 1910: 2.73 (2017-06-06 16:35:27.362510)
Training loss at step 1920: 2.34 (2017-06-06 16:35:29.248436)
Training loss at step 1930: 2.15 (2017-06-06 16:35:31.145827)
Training loss at step 1940: 2.14 (2017-06-06 16:35:33.004174)
Training loss at step 1950: 2.46 (2017-06-06 16:35:34.914423)
Training loss at step 1960: 1.87 (2017-06-06 16:35:36.970311)
Training loss at step 1970: 1.97 (2017-06-06 16:35:38.874127)
Training loss at step 1980: 1.94 (2017-06-06 16:35:40.923094)
Training loss at step 1990: 2.10 (2017-06-06 16:35:42.838076)
Training loss at step 2000: 2.19 (2017-06-06 16:35:44.738989)
Model Saved!
Training loss at step 2010: 1.81 (2017-06-06 16:35:47.629010)
Training loss at step 2020: 1.94 (2017-06-06 16:35:49.501003)
Training loss at step 2030: 2.07 (2017-06-06 16:35:51.348352)
Training loss at step 2040: 1.89 (2017-06-06 16:35:53.237360)
Training loss at step 2050: 2.35 (2017-06-06 16:35:55.085465)
Training loss at step 2060: 1.93 (2017-06-06 16:35:56.936668)
Training loss at step 2070: 2.54 (2017-06-06 16:35:58.785365)
Training loss at step 2080: 2.26 (2017-06-06 16:36:00.667863)
Training loss at step 2090: 2.10 (2017-06-06 16:36:02.518659)
Training loss at step 2100: 2.03 (2017-06-06 16:36:04.390983)
Training loss at step 2110: 2.13 (2017-06-06 16:36:06.258302)
Training loss at step 2120: 2.23 (2017-06-06 16:36:08.160409)
Training loss at step 2130: 1.85 (2017-06-06 16:36:10.080147)
Training loss at step 2140: 1.97 (2017-06-06 16:36:11.968014)
Training loss at step 2150: 2.11 (2017-06-06 16:36:13.819948)
Training loss at step 2160: 2.49 (2017-06-06 16:36:15.671043)
Training loss at step 2170: 1.94 (2017-06-06 16:36:17.517706)
Training loss at step 2180: 2.08 (2017-06-06 16:36:19.365561)
Training loss at step 2190: 2.05 (2017-06-06 16:36:21.265220)
Training loss at step 2200: 1.97 (2017-06-06 16:36:23.183602)
Training loss at step 2210: 1.98 (2017-06-06 16:36:25.031197)
Training loss at step 2220: 2.03 (2017-06-06 16:36:26.891066)
Training loss at step 2230: 1.94 (2017-06-06 16:36:28.784910)
Training loss at step 2240: 1.92 (2017-06-06 16:36:30.650939)
Training loss at step 2250: 1.86 (2017-06-06 16:36:32.636896)
Training loss at step 2260: 2.23 (2017-06-06 16:36:34.631850)
Training loss at step 2270: 1.77 (2017-06-06 16:36:36.565542)
Training loss at step 2280: 2.21 (2017-06-06 16:36:38.541437)
Training loss at step 2290: 1.70 (2017-06-06 16:36:40.514615)
Training loss at step 2300: 2.42 (2017-06-06 16:36:42.430700)
Training loss at step 2310: 1.85 (2017-06-06 16:36:44.287126)
Training loss at step 2320: 2.20 (2017-06-06 16:36:46.160500)
Training loss at step 2330: 1.52 (2017-06-06 16:36:48.043332)
Training loss at step 2340: 1.79 (2017-06-06 16:36:49.901941)
Training loss at step 2350: 2.00 (2017-06-06 16:36:51.859484)
Training loss at step 2360: 2.01 (2017-06-06 16:36:53.825106)
Training loss at step 2370: 1.91 (2017-06-06 16:36:55.772366)
Training loss at step 2380: 2.31 (2017-06-06 16:36:57.765035)
Training loss at step 2390: 2.23 (2017-06-06 16:36:59.643324)
Training loss at step 2400: 1.94 (2017-06-06 16:37:01.561029)
Model Saved!
Training loss at step 2410: 1.75 (2017-06-06 16:37:04.183132)
Training loss at step 2420: 2.12 (2017-06-06 16:37:06.040547)
Training loss at step 2430: 1.99 (2017-06-06 16:37:08.097062)
Training loss at step 2440: 2.14 (2017-06-06 16:37:10.172728)
Training loss at step 2450: 2.07 (2017-06-06 16:37:12.244045)
Training loss at step 2460: 2.31 (2017-06-06 16:37:14.220660)
Training loss at step 2470: 2.15 (2017-06-06 16:37:16.206482)
Training loss at step 2480: 2.68 (2017-06-06 16:37:18.180379)
Training loss at step 2490: 2.49 (2017-06-06 16:37:20.144199)
Training loss at step 2500: 2.10 (2017-06-06 16:37:22.136101)
Training loss at step 2510: 2.22 (2017-06-06 16:37:24.092529)
Training loss at step 2520: 2.29 (2017-06-06 16:37:25.970740)
Training loss at step 2530: 2.54 (2017-06-06 16:37:27.840665)
Training loss at step 2540: 2.29 (2017-06-06 16:37:29.723559)
Training loss at step 2550: 2.45 (2017-06-06 16:37:31.606334)
Training loss at step 2560: 2.20 (2017-06-06 16:37:33.463122)
Training loss at step 2570: 2.18 (2017-06-06 16:37:35.322499)
Training loss at step 2580: 2.25 (2017-06-06 16:37:37.179756)
Training loss at step 2590: 2.04 (2017-06-06 16:37:39.037264)
Training loss at step 2600: 2.57 (2017-06-06 16:37:40.964061)
Training loss at step 2610: 2.10 (2017-06-06 16:37:42.830268)
Training loss at step 2620: 2.28 (2017-06-06 16:37:44.688148)
Training loss at step 2630: 2.35 (2017-06-06 16:37:46.541868)
Training loss at step 2640: 2.33 (2017-06-06 16:37:48.485867)
Training loss at step 2650: 2.25 (2017-06-06 16:37:50.423788)
Training loss at step 2660: 2.28 (2017-06-06 16:37:52.396717)
Training loss at step 2670: 2.15 (2017-06-06 16:37:54.367324)
Training loss at step 2680: 2.14 (2017-06-06 16:37:56.381611)
Training loss at step 2690: 2.34 (2017-06-06 16:37:58.484238)
Training loss at step 2700: 2.18 (2017-06-06 16:38:00.534450)
Training loss at step 2710: 1.84 (2017-06-06 16:38:02.583580)
Training loss at step 2720: 2.20 (2017-06-06 16:38:04.651088)
Training loss at step 2730: 1.95 (2017-06-06 16:38:06.615645)
Training loss at step 2740: 1.90 (2017-06-06 16:38:08.550790)
Training loss at step 2750: 2.39 (2017-06-06 16:38:10.487670)
Training loss at step 2760: 2.09 (2017-06-06 16:38:12.345419)
Training loss at step 2770: 1.91 (2017-06-06 16:38:14.199313)
Training loss at step 2780: 2.03 (2017-06-06 16:38:16.077961)
Training loss at step 2790: 1.85 (2017-06-06 16:38:17.960292)
Training loss at step 2800: 2.00 (2017-06-06 16:38:19.818368)
Model Saved!
Training loss at step 2810: 2.36 (2017-06-06 16:38:23.237591)
Training loss at step 2820: 1.76 (2017-06-06 16:38:25.107234)
Training loss at step 2830: 2.15 (2017-06-06 16:38:26.980163)
Training loss at step 2840: 2.02 (2017-06-06 16:38:28.861853)
Training loss at step 2850: 1.95 (2017-06-06 16:38:30.755611)
Training loss at step 2860: 2.31 (2017-06-06 16:38:32.629071)
Training loss at step 2870: 2.03 (2017-06-06 16:38:34.498801)
Training loss at step 2880: 1.81 (2017-06-06 16:38:36.356096)
Training loss at step 2890: 1.88 (2017-06-06 16:38:38.210673)
Training loss at step 2900: 2.00 (2017-06-06 16:38:40.071256)
Training loss at step 2910: 2.13 (2017-06-06 16:38:41.957608)
Training loss at step 2920: 1.93 (2017-06-06 16:38:43.904921)
Training loss at step 2930: 2.07 (2017-06-06 16:38:45.758807)
Training loss at step 2940: 2.14 (2017-06-06 16:38:47.611084)
Training loss at step 2950: 2.08 (2017-06-06 16:38:49.468199)
Training loss at step 2960: 2.11 (2017-06-06 16:38:51.350296)
Training loss at step 2970: 2.18 (2017-06-06 16:38:53.233867)
Training loss at step 2980: 2.50 (2017-06-06 16:38:55.088865)
Training loss at step 2990: 1.81 (2017-06-06 16:38:56.960114)
Training loss at step 3000: 2.59 (2017-06-06 16:38:58.842998)
Training loss at step 3010: 1.71 (2017-06-06 16:39:00.722511)
Training loss at step 3020: 2.13 (2017-06-06 16:39:02.602357)
Training loss at step 3030: 1.92 (2017-06-06 16:39:04.464836)
Training loss at step 3040: 1.89 (2017-06-06 16:39:06.353027)
Training loss at step 3050: 2.23 (2017-06-06 16:39:08.328366)
Training loss at step 3060: 1.86 (2017-06-06 16:39:10.253978)
Training loss at step 3070: 2.03 (2017-06-06 16:39:12.185739)
Training loss at step 3080: 1.76 (2017-06-06 16:39:14.174223)
Training loss at step 3090: 2.22 (2017-06-06 16:39:16.056503)
Training loss at step 3100: 1.89 (2017-06-06 16:39:17.931200)
Training loss at step 3110: 2.13 (2017-06-06 16:39:19.795634)
Training loss at step 3120: 1.91 (2017-06-06 16:39:21.676245)
Training loss at step 3130: 2.07 (2017-06-06 16:39:23.576694)
Training loss at step 3140: 2.01 (2017-06-06 16:39:25.465109)
Training loss at step 3150: 2.03 (2017-06-06 16:39:27.349628)
Training loss at step 3160: 1.92 (2017-06-06 16:39:29.229961)
Training loss at step 3170: 1.76 (2017-06-06 16:39:31.082646)
Training loss at step 3180: 1.88 (2017-06-06 16:39:32.937408)
Training loss at step 3190: 1.94 (2017-06-06 16:39:34.810970)
Training loss at step 3200: 2.17 (2017-06-06 16:39:36.684857)
Model Saved!
Training loss at step 3210: 2.05 (2017-06-06 16:39:39.254623)
Training loss at step 3220: 2.08 (2017-06-06 16:39:41.109834)
Training loss at step 3230: 1.99 (2017-06-06 16:39:42.965941)
Training loss at step 3240: 1.61 (2017-06-06 16:39:44.819488)
Training loss at step 3250: 2.19 (2017-06-06 16:39:46.671890)
Training loss at step 3260: 1.90 (2017-06-06 16:39:48.615639)
Training loss at step 3270: 2.09 (2017-06-06 16:39:50.550088)
Training loss at step 3280: 2.04 (2017-06-06 16:39:52.522158)
Training loss at step 3290: 1.92 (2017-06-06 16:39:54.508491)
Training loss at step 3300: 2.21 (2017-06-06 16:39:56.457975)
Training loss at step 3310: 2.02 (2017-06-06 16:39:58.404999)
Training loss at step 3320: 2.22 (2017-06-06 16:40:00.381859)
Training loss at step 3330: 2.10 (2017-06-06 16:40:02.337196)
Training loss at step 3340: 2.10 (2017-06-06 16:40:04.273323)
Training loss at step 3350: 2.05 (2017-06-06 16:40:06.261523)
Training loss at step 3360: 2.06 (2017-06-06 16:40:08.247475)
Training loss at step 3370: 1.92 (2017-06-06 16:40:10.197968)
Training loss at step 3380: 2.26 (2017-06-06 16:40:12.208540)
Training loss at step 3390: 1.83 (2017-06-06 16:40:14.185038)
Training loss at step 3400: 2.43 (2017-06-06 16:40:16.134816)
Training loss at step 3410: 2.01 (2017-06-06 16:40:18.098272)
Training loss at step 3420: 2.17 (2017-06-06 16:40:20.107690)
Training loss at step 3430: 2.62 (2017-06-06 16:40:22.053411)
Training loss at step 3440: 1.90 (2017-06-06 16:40:23.925579)
Training loss at step 3450: 2.03 (2017-06-06 16:40:25.779549)
Training loss at step 3460: 2.24 (2017-06-06 16:40:27.649923)
Training loss at step 3470: 1.81 (2017-06-06 16:40:29.637538)
Training loss at step 3480: 2.15 (2017-06-06 16:40:31.578480)
Training loss at step 3490: 1.86 (2017-06-06 16:40:33.431130)
Training loss at step 3500: 2.07 (2017-06-06 16:40:35.314539)
Training loss at step 3510: 1.93 (2017-06-06 16:40:37.203203)
Training loss at step 3520: 2.76 (2017-06-06 16:40:39.084308)
Training loss at step 3530: 2.05 (2017-06-06 16:40:40.962081)
Training loss at step 3540: 2.03 (2017-06-06 16:40:42.847608)
Training loss at step 3550: 2.25 (2017-06-06 16:40:44.793385)
Training loss at step 3560: 2.05 (2017-06-06 16:40:46.752827)
Training loss at step 3570: 2.10 (2017-06-06 16:40:48.683623)
Training loss at step 3580: 2.00 (2017-06-06 16:40:50.668559)
Training loss at step 3590: 2.16 (2017-06-06 16:40:52.706356)
Training loss at step 3600: 2.05 (2017-06-06 16:40:54.658594)
Model Saved!
Training loss at step 3610: 1.55 (2017-06-06 16:40:57.426073)
Training loss at step 3620: 1.92 (2017-06-06 16:40:59.360314)
Training loss at step 3630: 1.94 (2017-06-06 16:41:01.242444)
Training loss at step 3640: 2.25 (2017-06-06 16:41:03.117410)
Training loss at step 3650: 2.31 (2017-06-06 16:41:05.000718)
Training loss at step 3660: 2.08 (2017-06-06 16:41:06.872449)
Training loss at step 3670: 2.16 (2017-06-06 16:41:08.741613)
Training loss at step 3680: 2.46 (2017-06-06 16:41:10.622255)
Training loss at step 3690: 2.03 (2017-06-06 16:41:12.490361)
Training loss at step 3700: 1.96 (2017-06-06 16:41:14.343502)
Training loss at step 3710: 2.22 (2017-06-06 16:41:16.199349)
Training loss at step 3720: 2.09 (2017-06-06 16:41:18.053952)
Training loss at step 3730: 2.12 (2017-06-06 16:41:19.906926)
Training loss at step 3740: 2.17 (2017-06-06 16:41:21.761292)
Training loss at step 3750: 2.12 (2017-06-06 16:41:23.614273)
Training loss at step 3760: 2.13 (2017-06-06 16:41:25.466339)
Training loss at step 3770: 2.09 (2017-06-06 16:41:27.320232)
Training loss at step 3780: 2.07 (2017-06-06 16:41:29.174078)
Training loss at step 3790: 1.91 (2017-06-06 16:41:31.054451)
Training loss at step 3800: 2.19 (2017-06-06 16:41:33.025156)
Training loss at step 3810: 2.17 (2017-06-06 16:41:34.971538)
Training loss at step 3820: 1.79 (2017-06-06 16:41:36.918988)
Training loss at step 3830: 2.07 (2017-06-06 16:41:38.868419)
Training loss at step 3840: 2.14 (2017-06-06 16:41:40.828074)
Training loss at step 3850: 2.27 (2017-06-06 16:41:42.782980)
Training loss at step 3860: 1.94 (2017-06-06 16:41:44.739214)
Training loss at step 3870: 2.13 (2017-06-06 16:41:46.609431)
Training loss at step 3880: 2.25 (2017-06-06 16:41:48.474467)
Training loss at step 3890: 2.07 (2017-06-06 16:41:50.426597)
Training loss at step 3900: 2.18 (2017-06-06 16:41:52.442551)
Training loss at step 3910: 1.73 (2017-06-06 16:41:54.424638)
Training loss at step 3920: 2.24 (2017-06-06 16:41:56.369728)
Training loss at step 3930: 2.33 (2017-06-06 16:41:58.231585)
Training loss at step 3940: 2.18 (2017-06-06 16:42:00.198051)
Training loss at step 3950: 2.18 (2017-06-06 16:42:02.167980)
Training loss at step 3960: 2.21 (2017-06-06 16:42:04.131907)
Training loss at step 3970: 2.17 (2017-06-06 16:42:06.116639)
Training loss at step 3980: 2.14 (2017-06-06 16:42:08.072315)
Training loss at step 3990: 1.85 (2017-06-06 16:42:10.013914)
Training loss at step 4000: 2.01 (2017-06-06 16:42:11.979829)
Model Saved!
Training loss at step 4010: 1.99 (2017-06-06 16:42:15.074986)
Training loss at step 4020: 1.78 (2017-06-06 16:42:17.034124)
Training loss at step 4030: 2.07 (2017-06-06 16:42:19.004183)
Training loss at step 4040: 2.09 (2017-06-06 16:42:20.985180)
Training loss at step 4050: 1.74 (2017-06-06 16:42:22.932344)
Training loss at step 4060: 2.22 (2017-06-06 16:42:24.885978)
Training loss at step 4070: 1.98 (2017-06-06 16:42:26.886962)
Training loss at step 4080: 1.86 (2017-06-06 16:42:28.846675)
Training loss at step 4090: 1.93 (2017-06-06 16:42:30.811665)
Training loss at step 4100: 2.21 (2017-06-06 16:42:32.672803)
Training loss at step 4110: 1.90 (2017-06-06 16:42:34.528015)
Training loss at step 4120: 1.89 (2017-06-06 16:42:36.380324)
Training loss at step 4130: 1.78 (2017-06-06 16:42:38.232499)
Training loss at step 4140: 2.19 (2017-06-06 16:42:40.100189)
Training loss at step 4150: 1.65 (2017-06-06 16:42:41.955352)
Training loss at step 4160: 2.17 (2017-06-06 16:42:43.818197)
Training loss at step 4170: 1.78 (2017-06-06 16:42:45.674413)
Training loss at step 4180: 2.08 (2017-06-06 16:42:47.526537)
Training loss at step 4190: 2.16 (2017-06-06 16:42:49.380557)
Training loss at step 4200: 1.97 (2017-06-06 16:42:51.234013)
Training loss at step 4210: 2.36 (2017-06-06 16:42:53.195182)
Training loss at step 4220: 1.79 (2017-06-06 16:42:55.175294)
Training loss at step 4230: 1.85 (2017-06-06 16:42:57.135548)
Training loss at step 4240: 1.83 (2017-06-06 16:42:59.092647)
Training loss at step 4250: 1.82 (2017-06-06 16:43:01.091601)
Training loss at step 4260: 2.32 (2017-06-06 16:43:03.070521)
Training loss at step 4270: 1.90 (2017-06-06 16:43:05.100126)
Training loss at step 4280: 1.84 (2017-06-06 16:43:07.120639)
Training loss at step 4290: 2.00 (2017-06-06 16:43:09.082375)
Training loss at step 4300: 1.84 (2017-06-06 16:43:11.034677)
Training loss at step 4310: 2.34 (2017-06-06 16:43:12.912942)
Training loss at step 4320: 2.16 (2017-06-06 16:43:14.806904)
Training loss at step 4330: 1.85 (2017-06-06 16:43:16.746604)
Training loss at step 4340: 2.38 (2017-06-06 16:43:18.746438)
Training loss at step 4350: 1.86 (2017-06-06 16:43:20.703919)
Training loss at step 4360: 1.94 (2017-06-06 16:43:22.677582)
Training loss at step 4370: 1.60 (2017-06-06 16:43:24.544648)
Training loss at step 4380: 1.94 (2017-06-06 16:43:26.493361)
Training loss at step 4390: 1.72 (2017-06-06 16:43:28.467862)
Training loss at step 4400: 1.82 (2017-06-06 16:43:30.439949)
Model Saved!
Training loss at step 4410: 2.07 (2017-06-06 16:43:33.389036)
Training loss at step 4420: 1.50 (2017-06-06 16:43:35.352755)
Training loss at step 4430: 2.11 (2017-06-06 16:43:37.316470)
Training loss at step 4440: 1.97 (2017-06-06 16:43:39.282845)
Training loss at step 4450: 2.36 (2017-06-06 16:43:41.216738)
Training loss at step 4460: 2.04 (2017-06-06 16:43:43.097057)
Training loss at step 4470: 1.85 (2017-06-06 16:43:44.963461)
Training loss at step 4480: 2.18 (2017-06-06 16:43:46.819573)
Training loss at step 4490: 1.54 (2017-06-06 16:43:48.684995)
Training loss at step 4500: 1.98 (2017-06-06 16:43:50.539713)
Training loss at step 4510: 1.88 (2017-06-06 16:43:52.394744)
Training loss at step 4520: 1.73 (2017-06-06 16:43:54.247503)
Training loss at step 4530: 2.05 (2017-06-06 16:43:56.183108)
Training loss at step 4540: 1.85 (2017-06-06 16:43:58.103235)
Training loss at step 4550: 2.21 (2017-06-06 16:43:59.969834)
Training loss at step 4560: 1.78 (2017-06-06 16:44:01.894143)
Training loss at step 4570: 1.80 (2017-06-06 16:44:03.800265)
Training loss at step 4580: 1.88 (2017-06-06 16:44:05.774280)
Training loss at step 4590: 1.83 (2017-06-06 16:44:07.814482)
Training loss at step 4600: 1.58 (2017-06-06 16:44:09.756928)
Training loss at step 4610: 1.98 (2017-06-06 16:44:11.695483)
Training loss at step 4620: 1.63 (2017-06-06 16:44:13.671716)
Training loss at step 4630: 2.11 (2017-06-06 16:44:15.653956)
Training loss at step 4640: 2.32 (2017-06-06 16:44:17.611417)
Training loss at step 4650: 2.08 (2017-06-06 16:44:19.559188)
Training loss at step 4660: 2.12 (2017-06-06 16:44:21.418062)
Training loss at step 4670: 1.69 (2017-06-06 16:44:23.274624)
Training loss at step 4680: 2.11 (2017-06-06 16:44:25.132704)
Training loss at step 4690: 2.47 (2017-06-06 16:44:27.001279)
Training loss at step 4700: 1.96 (2017-06-06 16:44:28.857081)
Training loss at step 4710: 1.70 (2017-06-06 16:44:30.731751)
Training loss at step 4720: 2.20 (2017-06-06 16:44:32.654815)
Training loss at step 4730: 1.95 (2017-06-06 16:44:34.739414)
Training loss at step 4740: 2.09 (2017-06-06 16:44:36.699019)
Training loss at step 4750: 2.11 (2017-06-06 16:44:38.740121)
Training loss at step 4760: 1.87 (2017-06-06 16:44:40.759221)
Training loss at step 4770: 1.99 (2017-06-06 16:44:42.630963)
Training loss at step 4780: 1.81 (2017-06-06 16:44:44.518430)
Training loss at step 4790: 2.15 (2017-06-06 16:44:46.392156)
Training loss at step 4800: 2.23 (2017-06-06 16:44:48.347743)
Model Saved!
Training loss at step 4810: 1.98 (2017-06-06 16:44:50.998888)
Training loss at step 4820: 1.97 (2017-06-06 16:44:52.964404)
Training loss at step 4830: 1.83 (2017-06-06 16:44:54.831666)
Training loss at step 4840: 2.27 (2017-06-06 16:44:56.876595)
Training loss at step 4850: 2.40 (2017-06-06 16:44:58.841723)
Training loss at step 4860: 5.97 (2017-06-06 16:45:00.714369)
Training loss at step 4870: 2.08 (2017-06-06 16:45:02.567452)
Training loss at step 4880: 1.94 (2017-06-06 16:45:04.425845)
Training loss at step 4890: 1.88 (2017-06-06 16:45:06.419270)
Training loss at step 4900: 2.23 (2017-06-06 16:45:08.353252)
Training loss at step 4910: 1.66 (2017-06-06 16:45:10.349410)
Training loss at step 4920: 1.93 (2017-06-06 16:45:12.347848)
Training loss at step 4930: 2.01 (2017-06-06 16:45:14.347923)
Training loss at step 4940: 1.97 (2017-06-06 16:45:16.327711)
Training loss at step 4950: 2.78 (2017-06-06 16:45:18.270591)
Training loss at step 4960: 2.10 (2017-06-06 16:45:20.204815)
Training loss at step 4970: 2.04 (2017-06-06 16:45:22.116499)
Training loss at step 4980: 2.35 (2017-06-06 16:45:24.114007)
Training loss at step 4990: 1.77 (2017-06-06 16:45:26.077058)
Training loss at step 5000: 1.98 (2017-06-06 16:45:28.041104)
Training loss at step 5010: 2.10 (2017-06-06 16:45:30.011915)
Training loss at step 5020: 1.89 (2017-06-06 16:45:31.975407)
Training loss at step 5030: 1.90 (2017-06-06 16:45:33.936483)
Training loss at step 5040: 1.96 (2017-06-06 16:45:35.897761)
Training loss at step 5050: 2.03 (2017-06-06 16:45:37.856168)
Training loss at step 5060: 1.86 (2017-06-06 16:45:39.847523)
Training loss at step 5070: 1.88 (2017-06-06 16:45:41.834122)
Training loss at step 5080: 1.84 (2017-06-06 16:45:43.750424)
Training loss at step 5090: 2.10 (2017-06-06 16:45:45.634090)
Training loss at step 5100: 2.01 (2017-06-06 16:45:47.535843)
Training loss at step 5110: 2.00 (2017-06-06 16:45:49.413132)
Training loss at step 5120: 2.06 (2017-06-06 16:45:51.293737)
Training loss at step 5130: 2.42 (2017-06-06 16:45:53.169650)
Training loss at step 5140: 2.07 (2017-06-06 16:45:55.060581)
Training loss at step 5150: 1.95 (2017-06-06 16:45:56.954728)
Training loss at step 5160: 2.02 (2017-06-06 16:45:58.826710)
Training loss at step 5170: 1.91 (2017-06-06 16:46:00.705856)
Training loss at step 5180: 1.90 (2017-06-06 16:46:02.573956)
Training loss at step 5190: 2.19 (2017-06-06 16:46:04.443720)
Training loss at step 5200: 2.43 (2017-06-06 16:46:06.340450)
Model Saved!
Training loss at step 5210: 2.27 (2017-06-06 16:46:09.152240)
Training loss at step 5220: 1.89 (2017-06-06 16:46:11.114195)
Training loss at step 5230: 1.62 (2017-06-06 16:46:13.088499)
Training loss at step 5240: 1.88 (2017-06-06 16:46:15.045372)
Training loss at step 5250: 2.05 (2017-06-06 16:46:16.918300)
Training loss at step 5260: 1.79 (2017-06-06 16:46:18.771366)
Training loss at step 5270: 1.44 (2017-06-06 16:46:20.640879)
Training loss at step 5280: 1.86 (2017-06-06 16:46:22.508265)
Training loss at step 5290: 2.01 (2017-06-06 16:46:24.364722)
Training loss at step 5300: 2.48 (2017-06-06 16:46:26.220392)
Training loss at step 5310: 1.74 (2017-06-06 16:46:28.078594)
Training loss at step 5320: 2.21 (2017-06-06 16:46:29.950321)
Training loss at step 5330: 1.84 (2017-06-06 16:46:31.828934)
Training loss at step 5340: 1.66 (2017-06-06 16:46:33.692600)
Training loss at step 5350: 1.81 (2017-06-06 16:46:35.565374)
Training loss at step 5360: 1.65 (2017-06-06 16:46:37.453176)
Training loss at step 5370: 1.61 (2017-06-06 16:46:39.350646)
Training loss at step 5380: 1.52 (2017-06-06 16:46:41.213159)
Training loss at step 5390: 1.78 (2017-06-06 16:46:43.084957)
Training loss at step 5400: 1.52 (2017-06-06 16:46:44.953931)
Training loss at step 5410: 1.77 (2017-06-06 16:46:46.829089)
Training loss at step 5420: 1.73 (2017-06-06 16:46:48.712646)
Training loss at step 5430: 2.05 (2017-06-06 16:46:50.596501)
Training loss at step 5440: 2.06 (2017-06-06 16:46:52.466314)
Training loss at step 5450: 1.65 (2017-06-06 16:46:54.321598)
Training loss at step 5460: 1.76 (2017-06-06 16:46:56.281988)
Training loss at step 5470: 1.34 (2017-06-06 16:46:58.208391)
Training loss at step 5480: 1.93 (2017-06-06 16:47:00.098801)
Training loss at step 5490: 1.94 (2017-06-06 16:47:01.966564)
Training loss at step 5500: 1.74 (2017-06-06 16:47:03.836690)
Training loss at step 5510: 1.59 (2017-06-06 16:47:05.697434)
Training loss at step 5520: 2.21 (2017-06-06 16:47:07.561909)
Training loss at step 5530: 1.91 (2017-06-06 16:47:09.486433)
Training loss at step 5540: 2.15 (2017-06-06 16:47:11.446973)
Training loss at step 5550: 2.40 (2017-06-06 16:47:13.325134)
Training loss at step 5560: 1.83 (2017-06-06 16:47:15.196604)
Training loss at step 5570: 2.28 (2017-06-06 16:47:17.076720)
Training loss at step 5580: 2.28 (2017-06-06 16:47:18.952110)
Training loss at step 5590: 2.09 (2017-06-06 16:47:20.819678)
Training loss at step 5600: 1.99 (2017-06-06 16:47:22.793811)
Model Saved!
Training loss at step 5610: 2.56 (2017-06-06 16:47:26.034892)
Training loss at step 5620: 2.10 (2017-06-06 16:47:27.987423)
Training loss at step 5630: 2.00 (2017-06-06 16:47:29.925350)
Training loss at step 5640: 1.84 (2017-06-06 16:47:31.841283)
Training loss at step 5650: 1.81 (2017-06-06 16:47:33.797379)
Training loss at step 5660: 2.41 (2017-06-06 16:47:35.741531)
Training loss at step 5670: 2.03 (2017-06-06 16:47:37.656026)
Training loss at step 5680: 2.13 (2017-06-06 16:47:39.513968)
Training loss at step 5690: 2.43 (2017-06-06 16:47:41.368477)
Training loss at step 5700: 2.12 (2017-06-06 16:47:43.224966)
Training loss at step 5710: 1.98 (2017-06-06 16:47:45.094974)
Training loss at step 5720: 2.14 (2017-06-06 16:47:46.985397)
Training loss at step 5730: 2.11 (2017-06-06 16:47:48.871849)
Training loss at step 5740: 1.94 (2017-06-06 16:47:50.741102)
Training loss at step 5750: 1.85 (2017-06-06 16:47:52.619807)
Training loss at step 5760: 1.93 (2017-06-06 16:47:54.505188)
Training loss at step 5770: 2.32 (2017-06-06 16:47:56.488948)
Training loss at step 5780: 2.03 (2017-06-06 16:47:58.448097)
Training loss at step 5790: 1.94 (2017-06-06 16:48:00.383579)
Training loss at step 5800: 1.82 (2017-06-06 16:48:02.352082)
Training loss at step 5810: 2.04 (2017-06-06 16:48:04.326598)
Training loss at step 5820: 1.92 (2017-06-06 16:48:06.204826)
Training loss at step 5830: 2.00 (2017-06-06 16:48:08.165374)
Training loss at step 5840: 2.36 (2017-06-06 16:48:10.026811)
Training loss at step 5850: 1.80 (2017-06-06 16:48:11.881062)
Training loss at step 5860: 2.48 (2017-06-06 16:48:13.733546)
Training loss at step 5870: 2.46 (2017-06-06 16:48:15.586179)
Training loss at step 5880: 1.72 (2017-06-06 16:48:17.481305)
Training loss at step 5890: 1.94 (2017-06-06 16:48:19.359188)
Training loss at step 5900: 2.17 (2017-06-06 16:48:21.221473)
Training loss at step 5910: 2.05 (2017-06-06 16:48:23.082207)
Training loss at step 5920: 1.89 (2017-06-06 16:48:24.940454)
Training loss at step 5930: 1.76 (2017-06-06 16:48:26.796165)
Training loss at step 5940: 2.28 (2017-06-06 16:48:28.649705)
Training loss at step 5950: 2.07 (2017-06-06 16:48:30.581711)
Training loss at step 5960: 1.81 (2017-06-06 16:48:32.607061)
Training loss at step 5970: 1.88 (2017-06-06 16:48:34.639725)
Training loss at step 5980: 1.88 (2017-06-06 16:48:36.612939)
Training loss at step 5990: 1.97 (2017-06-06 16:48:38.567023)
Training loss at step 6000: 1.88 (2017-06-06 16:48:40.540937)
Model Saved!
Training loss at step 6010: 2.23 (2017-06-06 16:48:43.540710)
Training loss at step 6020: 1.91 (2017-06-06 16:48:45.437337)
Training loss at step 6030: 2.00 (2017-06-06 16:48:47.296543)
Training loss at step 6040: 1.78 (2017-06-06 16:48:49.154551)
Training loss at step 6050: 1.89 (2017-06-06 16:48:51.031839)
Training loss at step 6060: 2.46 (2017-06-06 16:48:52.900124)
Training loss at step 6070: 2.93 (2017-06-06 16:48:54.756121)
Training loss at step 6080: 2.12 (2017-06-06 16:48:56.611632)
Training loss at step 6090: 2.09 (2017-06-06 16:48:58.548292)
Training loss at step 6100: 1.89 (2017-06-06 16:49:00.488384)
Training loss at step 6110: 2.30 (2017-06-06 16:49:02.346263)
Training loss at step 6120: 2.01 (2017-06-06 16:49:04.198737)
Training loss at step 6130: 1.99 (2017-06-06 16:49:06.051450)
Training loss at step 6140: 1.87 (2017-06-06 16:49:07.924327)
Training loss at step 6150: 2.01 (2017-06-06 16:49:09.788974)
Training loss at step 6160: 1.95 (2017-06-06 16:49:11.663413)
Training loss at step 6170: 1.81 (2017-06-06 16:49:13.558571)
Training loss at step 6180: 2.08 (2017-06-06 16:49:15.448979)
Training loss at step 6190: 2.09 (2017-06-06 16:49:17.342317)
Training loss at step 6200: 1.96 (2017-06-06 16:49:19.230150)
Training loss at step 6210: 1.72 (2017-06-06 16:49:21.094083)
Training loss at step 6220: 1.82 (2017-06-06 16:49:22.956914)
Training loss at step 6230: 1.84 (2017-06-06 16:49:24.833995)
Training loss at step 6240: 1.84 (2017-06-06 16:49:26.727058)
Training loss at step 6250: 1.95 (2017-06-06 16:49:28.598243)
Training loss at step 6260: 1.64 (2017-06-06 16:49:30.463367)
Training loss at step 6270: 2.06 (2017-06-06 16:49:32.319379)
Training loss at step 6280: 2.05 (2017-06-06 16:49:34.174078)
Training loss at step 6290: 1.78 (2017-06-06 16:49:36.030339)
Training loss at step 6300: 2.23 (2017-06-06 16:49:37.888480)
Training loss at step 6310: 2.01 (2017-06-06 16:49:39.755758)
Training loss at step 6320: 2.06 (2017-06-06 16:49:41.687690)
Training loss at step 6330: 2.17 (2017-06-06 16:49:43.630410)
Training loss at step 6340: 2.11 (2017-06-06 16:49:45.602749)
Training loss at step 6350: 2.16 (2017-06-06 16:49:47.567563)
Training loss at step 6360: 1.66 (2017-06-06 16:49:49.532816)
Training loss at step 6370: 2.04 (2017-06-06 16:49:51.498872)
Training loss at step 6380: 2.52 (2017-06-06 16:49:53.460889)
Training loss at step 6390: 1.92 (2017-06-06 16:49:55.424918)
Training loss at step 6400: 1.94 (2017-06-06 16:49:57.387283)
Model Saved!
Training loss at step 6410: 1.83 (2017-06-06 16:50:00.134320)
Training loss at step 6420: 1.83 (2017-06-06 16:50:02.097472)
Training loss at step 6430: 2.40 (2017-06-06 16:50:04.063167)
Training loss at step 6440: 2.08 (2017-06-06 16:50:06.033418)
Training loss at step 6450: 1.61 (2017-06-06 16:50:07.999226)
Training loss at step 6460: 1.93 (2017-06-06 16:50:09.966184)
Training loss at step 6470: 1.89 (2017-06-06 16:50:11.946815)
Training loss at step 6480: 1.98 (2017-06-06 16:50:13.854024)
Training loss at step 6490: 1.95 (2017-06-06 16:50:15.726154)
Training loss at step 6500: 1.78 (2017-06-06 16:50:17.593216)
Training loss at step 6510: 2.13 (2017-06-06 16:50:19.456060)
Training loss at step 6520: 1.87 (2017-06-06 16:50:21.317013)
Training loss at step 6530: 2.21 (2017-06-06 16:50:23.180826)
Training loss at step 6540: 1.66 (2017-06-06 16:50:25.051561)
Training loss at step 6550: 1.89 (2017-06-06 16:50:26.919862)
Training loss at step 6560: 1.81 (2017-06-06 16:50:28.790517)
Training loss at step 6570: 1.50 (2017-06-06 16:50:30.658200)
Training loss at step 6580: 2.12 (2017-06-06 16:50:32.520896)
Training loss at step 6590: 2.06 (2017-06-06 16:50:34.375102)
Training loss at step 6600: 1.59 (2017-06-06 16:50:36.231097)
Training loss at step 6610: 1.86 (2017-06-06 16:50:38.088860)
Training loss at step 6620: 1.59 (2017-06-06 16:50:39.943575)
Training loss at step 6630: 1.75 (2017-06-06 16:50:41.799682)
Training loss at step 6640: 1.63 (2017-06-06 16:50:43.658174)
Training loss at step 6650: 2.01 (2017-06-06 16:50:45.512250)
Training loss at step 6660: 1.61 (2017-06-06 16:50:47.366495)
Training loss at step 6670: 1.69 (2017-06-06 16:50:49.284815)
Training loss at step 6680: 1.70 (2017-06-06 16:50:51.192400)
Training loss at step 6690: 2.02 (2017-06-06 16:50:53.166479)
Training loss at step 6700: 2.09 (2017-06-06 16:50:55.057184)
Training loss at step 6710: 1.90 (2017-06-06 16:50:56.938570)
Training loss at step 6720: 1.93 (2017-06-06 16:50:58.830195)
Training loss at step 6730: 1.85 (2017-06-06 16:51:00.694321)
Training loss at step 6740: 1.73 (2017-06-06 16:51:02.549982)
Training loss at step 6750: 1.60 (2017-06-06 16:51:04.404588)
Training loss at step 6760: 1.74 (2017-06-06 16:51:06.257422)
Training loss at step 6770: 1.96 (2017-06-06 16:51:08.125083)
Training loss at step 6780: 1.49 (2017-06-06 16:51:09.981469)
Training loss at step 6790: 1.73 (2017-06-06 16:51:11.840817)
Training loss at step 6800: 1.19 (2017-06-06 16:51:13.701952)
Model Saved!
Training loss at step 6810: 2.71 (2017-06-06 16:51:16.301049)
Training loss at step 6820: 2.20 (2017-06-06 16:51:18.167447)
Training loss at step 6830: 1.79 (2017-06-06 16:51:20.021446)
Training loss at step 6840: 1.79 (2017-06-06 16:51:21.965055)
Training loss at step 6850: 2.15 (2017-06-06 16:51:23.821972)
Training loss at step 6860: 1.76 (2017-06-06 16:51:25.674644)
Training loss at step 6870: 2.58 (2017-06-06 16:51:27.549052)
Training loss at step 6880: 1.83 (2017-06-06 16:51:29.433474)
Training loss at step 6890: 2.08 (2017-06-06 16:51:31.316894)
Training loss at step 6900: 2.56 (2017-06-06 16:51:33.189886)
Training loss at step 6910: 2.43 (2017-06-06 16:51:35.044126)
Training loss at step 6920: 1.69 (2017-06-06 16:51:36.898263)
Training loss at step 6930: 2.21 (2017-06-06 16:51:38.761214)
Training loss at step 6940: 2.40 (2017-06-06 16:51:40.613937)
Training loss at step 6950: 1.93 (2017-06-06 16:51:42.473357)
Training loss at step 6960: 1.74 (2017-06-06 16:51:44.328930)
Training loss at step 6970: 1.55 (2017-06-06 16:51:46.194051)
Training loss at step 6980: 2.05 (2017-06-06 16:51:48.067056)
Training loss at step 6990: 2.33 (2017-06-06 16:51:49.956207)

In [11]:
test_start = 'I plan to make the world a better place.'
with tf.Session(graph=graph) as sess:
    with tf.device("/cpu:0"):
        #init graph, load model
        tf.initialize_all_variables().run()
        model = tf.train.latest_checkpoint(checkpoint_directory)
        saver = tf.train.Saver()
        saver.restore(sess, model)

        test_data = tf.placeholder(tf.float32, shape=[1, char_size])
        test_output = tf.Variable(tf.zeros([1, hidden_nodes]))
        test_state = tf.Variable(tf.zeros([1, hidden_nodes]))

        reset_test_state = tf.group(test_output.assign(tf.zeros([1, hidden_nodes])), 
                                    test_state.assign(tf.zeros([1, hidden_nodes])))

        test_output, test_state = lstm(test_data, test_output, test_state)
        test_prediction = tf.nn.softmax(tf.matmul(test_output, w) + b)
        #set input variable to generate chars from
        reset_test_state.run() 
        test_generated = test_start

        #for every char in the input sentennce
        for i in range(len(test_start) - 1):
            #initialize an empty char store
            test_X = np.zeros((1, char_size))
            #store it in id from
            test_X[0, char2id[test_start[i]]] = 1.
            #feed it to model, test_prediction is the output value
            _ = sess.run(test_prediction, feed_dict={test_data: test_X})


        #where we store encoded char predictions
        test_X = np.zeros((1, char_size))
        test_X[0, char2id[test_start[-1]]] = 1.

        #lets generate 500 characters
        for i in range(500):
            #get each prediction probability
            prediction = test_prediction.eval({test_data: test_X})[0]
            #one hot encode it
            #print prediction
            next_char_one_hot = sample(prediction)
            #print next_char_one_hot
            #get the indices of the max values (highest probability)  and convert to char
            next_char = id2char[np.argmax(next_char_one_hot)]
            #add each char to the output text iteratively
            test_generated += next_char
            #update the 
            test_X = next_char_one_hot.reshape((1, char_size))
        print test_generated


I plan to make the world a better place. d wixtind clyesoune ofthoffind tw V , waroprediaroffof olid Sthis Sthe oriphtond thedaricr h ftrifmiptcherisededes wamed aspiour sthicthacee fseminmerilinipaffiguachin pz tice saydipasthe omind ch fin torinameds de 
 Ased 4 Sthintr Asce hirchadedewn tix aled S Pld Catweaf te s pine celindinie mede M7 Sald ftaly Aied 2 � frs ritherind N rersed ; @ sonde E fwiay ommpteredammerioamermelinichisp�5 oppld 
 Chive 7 qualorialy th Cesy . galacherorth Puaftr ,0 in sthalw wamed acherotonalmind shend apha

In [ ]: